Note. Boxplots display the interquartile range (IQR, center box), and the whiskers extend 1.5*IQR from the lower and upper hinge. The white point indicates the mean and the white center line indicates the median.
In a first step we import the raw Qualtrics data, which was downloaded as an SPSS file.
# Reset working directory to folder current file is saved in
#setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Import Qualtrics Survey Data
dt0Raw <- read_spss("data/raw data/Snowball+Corona+Long-Term+Coverage+-+Baseline_April+1,+2020_07.26.sav")
The raw data set includes 660 variables for 22140 cases.
Filter the Preview responses.
# flag Preview Responses
dt1Preview <- dt0Raw %>%
mutate(FilterPreview = labelled(ifelse(Status == 0,0,1),
labels = c(preview = 1), label="Filter: survey preview response"))
Inspecting missing data in the items.
# Table: Missing Data per item
dt1Preview %>%
dplyr::select(-starts_with("t_"), -starts_with("Pol")) %>% #drop timers and Political orientation (because of translation missingness)
dplyr::select_if(~sum(is.na(.)) > 0) %>% # remove all variables that have no missingess
naniar::miss_var_summary(.) %>% # by variable summary of missingness proportion
DT::datatable(.,
colnames = c("Variable", "Number Missing", "Percentage Missing"),
filter = 'top',
extensions = 'Buttons',
options = list(
columnDefs = list(list(className = 'dt-center')),
#autoWidth = TRUE,
dom = 'Bfrtlip',
buttons = c('copy', 'csv', 'excel', 'pdf', 'print'))) %>%
DT::formatRound('pct_miss', digits = 2)
# Plot: Missing Data per item
dt1Preview %>%
dplyr::select(-starts_with("t_"), -starts_with("Pol")) %>% #drop timers and Political orientation (because of translation missingness)
dplyr::select_if(~sum(is.na(.)) > 0) %>% # remove all variables that have no missingess
naniar::gg_miss_var(.) # visualize by variable summary of missingness proportion
# Plot: Missing Data cumulative
dt1Preview %>%
dplyr::select(-starts_with("t_"), -starts_with("Pol")) %>% #drop timers and Political orientation (because of translation missingness)
dplyr::select_if(~sum(is.na(.)) > 0) %>% # remove all variables that have no missingess
naniar::gg_miss_var_cumsum(.) # missingness development over survey
# Co-occurences of missingess - too many variables
#dt0Raw %>%
# dplyr::select(-starts_with("t_"), -starts_with("Pol")) %>% #drop timers and Political orientation (because of translation missingness)
# dplyr::select_if(~sum(is.na(.)) > 0) %>% # remove all variables that have no missingess
# naniar::gg_miss_upset(., nsets = n_var_miss(.)) # visualize missingess co-occurences
# set time cut-off criterion:
progressCutOff <- 97 #cut-off criterion in percent
progressFilter <- dt1Preview %>%
dplyr::select(Progress) %>%
mutate(out = Progress < progressCutOff)
progressCutOffPerc <- round(sum(progressFilter$out)/nrow(progressFilter)*100,2) # percent of missing data with current cut-off criterion
# plot histogram and missing
ggplot(data=progressFilter, aes(x=Progress, fill=out)) +
geom_histogram(bins=50,
alpha=.6) +
geom_vline(xintercept = progressCutOff,
color = "darkred",
linetype = "longdash") +
geom_text(aes(x=progressCutOff, label=paste0("Progress cut-off: ",progressCutOff,"%\n"), y=Inf),
hjust = 1,
colour="darkred",
angle=90) +
geom_text(aes(x=progressCutOff, label=paste0("\ndata loss: ",progressCutOffPerc,"%"), y=Inf),
hjust = 1,
colour="darkred",
angle=90) +
#scale_x_continuous(breaks = seq(0, 100,3)) +
scale_fill_manual(values=c("darkgrey","darkred")) +
labs(title = "Histogram: Survey Progress",
x = "Survey Progress [Percent completed]",
y = "Frequency Count") +
theme_Publication() +
theme(legend.position = "none")
# flag anyone with less than 5 minutes survey duration
dt2Progress <- dt1Preview %>%
mutate(FilterProgress = labelled(ifelse(Progress < progressCutOff,1,0),
labels = c(`consent` = 1), label="Filter: Did not see debriefing"))
rm(progressFilter, progressCutOff, progressCutOffPerc)
Filter survey responses that were shorter than 5 minutes.
# truncate data:
tOutlierHigh <- dt2Progress %>%
dplyr::select(Duration__in_seconds_) %>%
filter(Duration__in_seconds_<=stats::median(Duration__in_seconds_)+stats::mad(Duration__in_seconds_)*3.5) %>%
mutate(Minutes = Duration__in_seconds_/60)
# set time cut-off criterion:
tCutOff <- 5 #cut-off criterion in minutes
# CJ: This might be a bit strict, I suspect that I completed it in <10 minutes.
tCutOffPerc <- round(sum(tOutlierHigh$Minutes<tCutOff)/nrow(dt2Progress)*100,2) # percent of missing data with current cut-off criterion
tOutlierHigh$out <- tOutlierHigh$Minutes < tCutOff
# plot histogram and missing
ggplot(data=tOutlierHigh, aes(x=Minutes, fill=out)) +
geom_histogram(bins=round(max(tOutlierHigh$Minutes),0),
alpha=.6) +
geom_vline(xintercept = tCutOff,
color = "darkred",
linetype = "longdash") +
geom_text(aes(x=tCutOff, label=paste0("time cut-off: ",tCutOff," Minutes\n"), y=Inf),
hjust = 1,
colour="darkred",
angle=90) +
geom_text(aes(x=tCutOff, label=paste0("\ndata loss: ",tCutOffPerc,"%"), y=Inf),
hjust = 1,
colour="darkred",
angle=90) +
scale_x_continuous(breaks = seq(0, round(max(tOutlierHigh$Minutes),0), 5)) +
scale_fill_manual(values=c("darkgrey","darkred")) +
labs(title = "Truncated Histogram: Survey Duration",
x = "Duration [Mintues]",
y = "Frequency Count",
caption = "Notes:
(1) Truncated: all participants who took less time than Median+3.5*MAD
(2) Each bin represents one Minute") +
theme_Publication() +
theme(legend.position = "none")
# flag anyone with less than 5 minutes survey duration
dt3Time <- dt2Progress %>%
mutate(FilterTime = labelled(ifelse(Duration__in_seconds_ > tCutOff*60,0,1),
labels = c(`extremely quick` = 1), label="Filter: Took less than 5 minutes on survey"))
rm(tOutlierHigh, tCutOff, tCutOffPerc)
Filter participants, who have straightlined on the job insecurity scale, which includes a reverse coded item. We only flag people who straightlined outside the median categories because all “neither agree nor disagree” might be meaningful response.
# CheckMissingness pattern
naniar::gg_miss_upset(dt3Time %>%
dplyr::select(ResponseId, jbInsec01, jbInsec02, jbInsec03) %>%
na_if(., -99) # all -99 into <NA>
)
# isolate respondents who have straightlined outside a the median categories (b/c all "neither agree nor disagree" might be meaningful response)
jobinsecRed <- dt3Time %>%
dplyr::select(ResponseId, jbInsec01, jbInsec02, jbInsec03) %>%
na_if(., -99) %>% # all -99 into <NA>
na.omit() %>% # remove people who have missing data on one of the three items
mutate(mean = rowMeans(dplyr::select(., c("jbInsec01", "jbInsec02", "jbInsec03"))),
sd = matrixStats::rowSds(as.matrix(dplyr::select(., c("jbInsec01", "jbInsec02", "jbInsec03"))))) %>% # calculate row-means and row-sds
filter(sd == 0, mean != 0)
# flag anyone who straightlined on job insecurity
dt4Straightliner <- dt3Time %>%
mutate(FilterStraightliner = labelled(ifelse(!ResponseId %in% jobinsecRed$ResponseId,0,1),
labels = c(straightliner = 1), label="Filter: straightliner on Job Insecurity"))
rm(jobinsecRed)
Note: For each of the scales we do an item analysis, and combine the items to the mean- (.m), and factor scores (.fa). We also centered (.c) and standardized (.z) the mean scores. Most of these items are not labelled for SPSS yet.
Re-coding reverse coded items and the Qualtrics language codes.
# Recoded Items
dt5newVars <- dt4Straightliner %>%
mutate(jbInsec02_R = labelled(recode(as.numeric(jbInsec02), `-2` = 2, `-1` = 1, `0` = 0, `1` = -1, `2` = -2, `-99` = -99),
labels = NULL, label="Job Insecurity 02 (re-coded)"),
bor03_R = labelled(recode(as.numeric(bor03), `-2` = 2, `-1` = 1, `0` = 0, `1` = -1, `2` = -2),
labels = NULL, label="Boredom 03 (re-coded)"))
# Language
# Import Qualtrics Language Codes
qualtricsLanguage <- read_excel("data/raw data/qualtricsLanguageCodes.xlsx")
dt5newVars <- merge(x=dt5newVars, y=qualtricsLanguage, by="Q_Language", all.x=TRUE)
rm(qualtricsLanguage)
We currently have 982 different free text country responses. Here we aim to consolidate them into one variable.
# CJ: Just as a benchmark, geolocate IP address. Can always use this if a free
# CJ: text country response does not resolve.
library(rgeolocate)
file <- system.file("extdata","GeoLite2-Country.mmdb", package = "rgeolocate")
geolocations <- maxmind(dt5newVars$IPAddress, file, c("continent_name", "country_code", "country_name"))
dt5newVars$coded_country_ip <- geolocations$country_name
# CJ: This is a slightly cleaner, faster, and more informative regex based approach.
# CJ: The function and dictionary (based on the code below, some errors corrected)
# CJ: is in source("./scripts/functions/dictionary_functions.R")
country_matches <- cat_words(tolower(dt5newVars$country), country_dict)
# Check duplicates, and which regular expressions triggered them
invisible(country_matches$dup)
# Check unmatched strings, fix common ones
invisible(head(country_matches$unmatched))
dt5newVars$coded_country_cj <- country_matches$words
#code originally provided by Courtney Soderberg
dt5newVars <- dt5newVars %>%
mutate(coded_country = case_when(grepl('^usa$', country, ignore.case = T) | grepl('unites state', country, ignore.case = T) |
grepl('united state', country, ignore.case = T) | grepl('^america$', country, ignore.case = T) |
grepl('U\\.S\\.', country, ignore.case = T) | grepl('Estados Unidos', country, ignore.case = T) |
grepl('colorado', country, ignore.case = T) | grepl('^us$', country, ignore.case = T) |
grepl('xas', country, ignore.case = T) | grepl('sates', country, ignore.case = T) |
grepl('Amerika Serikat', country, ignore.case = T) | grepl('california', country, ignore.case = T) |
grepl('corlifornia', country, ignore.case = T) | grepl('états-unis', country, ignore.case = T) |
grepl('york', country, ignore.case = T) | grepl('yark', country, ignore.case = T) |
grepl('puerto rico', country, ignore.case = T) | grepl('^tx$', country, ignore.case = T) |
grepl('^tn$', country, ignore.case = T) | grepl('U S', country, ignore.case = T) ~ 'United States of America',
grepl('canad', country, ignore.case = T) | grepl('vancouver', country, ignore.case = T) ~ 'Canada',
grepl('mexico', country, ignore.case = T) | grepl('México', country, ignore.case = T) ~ 'Mexico',
grepl('spain', country, ignore.case = T) | grepl('esp', country, ignore.case = T) |
grepl('Spagna', country, ignore.case = T) | grepl('Spanien', country, ignore.case = T) |
grepl('Catal', country, ignore.case = T) | grepl('Euskal Herria', country, ignore.case = T) |
grepl('basque', country, ignore.case = T) | grepl('Eapaña', country, ignore.case = T) |
grepl('Esapaña', country, ignore.case = T) | grepl('madrid', country, ignore.case = T) |
grepl('Montalbán de Córdoba', country, ignore.case = T) | grepl('Pais vasco', country, ignore.case = T) |
grepl('Spanje', country, ignore.case = T) ~ 'Spain',
grepl('france', country, ignore.case = T) | grepl('Francia', country, ignore.case = T) |
grepl('Frankrijk', country, ignore.case = T) ~ 'France',
grepl('germany', country, ignore.case = T) | grepl('deutschland', country, ignore.case = T) |
grepl('Alemania', country, ignore.case = T) | grepl('germania', country, ignore.case = T) |
grepl('^Almanya$', country, ignore.case = T) | grepl('berlin', country, ignore.case = T) |
grepl('Duitsland', country, ignore.case = T) ~ 'Germany',
grepl('portugal', country, ignore.case = T) ~ 'Portugal',
grepl('weden', country, ignore.case = T) ~ 'Sweden',
grepl('netherland', country, ignore.case = T) | grepl('nederland', country, ignore.case = T) |
grepl('Niederlande', country, ignore.case = T) | grepl('Belanda', country, ignore.case = T) |
grepl('^NL$', country, ignore.case = T) | grepl('Olanda', country, ignore.case = T) |
grepl('Paesi Bassi', country, ignore.case = T) | grepl('bajos', country, ignore.case = T) |
grepl('Gelderland', country, ignore.case = T) | grepl('Hollanda', country, ignore.case = T) ~ 'Netherlands',
grepl('^indonesia$', country, ignore.case = T) | grepl('indonesian', country, ignore.case = T) |
grepl('kota Tarakan', country, ignore.case = T) | grepl('Imdonesia', country, ignore.case = T) |
grepl('Indònesia', country, ignore.case = T) | grepl('jakarta', country, ignore.case = T) ~ 'Indonesia',
grepl('ital', country, ignore.case = T) | grepl('Sardegna', country, ignore.case = T) |
grepl('Bisceglie', country, ignore.case = T) | grepl('Ladispoli', country, ignore.case = T) |
grepl('Castelforte', country, ignore.case = T) | grepl('milano', country, ignore.case = T) |
(grepl('roma', country, ignore.case = T) & !grepl('romania', country, ignore.case = T)) |
grepl('Dorgali', country, ignore.case = T) | grepl('bari', country, ignore.case = T) |
grepl('bologna', country, ignore.case = T) | grepl('Brescia', country, ignore.case = T) |
grepl('Cala gonone', country, ignore.case = T) | grepl('Chieti', country, ignore.case = T) |
grepl('Ferentino', country, ignore.case = T) | grepl('Frosinone', country, ignore.case = T) |
grepl('Gragnano lucca ', country, ignore.case = T) | grepl('Guidonia', country, ignore.case = T) |
grepl('Itaia', country, ignore.case = T) | grepl('İtalya', country, ignore.case = T) |
grepl('Mareno di Piave', country, ignore.case = T) | grepl('modena', country, ignore.case = T) |
grepl('Pellizzano', country, ignore.case = T) | grepl('Predazzo', country, ignore.case = T) |
grepl('Refrontolo', country, ignore.case = T) | grepl('Cosma e Damiano', country, ignore.case = T) |
grepl('Scalea', country, ignore.case = T) | grepl('Scauri', country, ignore.case = T) |
grepl('Segni', country, ignore.case = T) | grepl('SETTIMO VITTONE', country, ignore.case = T) |
grepl('Susegana', country, ignore.case = T) | grepl('Terralba', country, ignore.case = T) |
grepl('trento', country, ignore.case = T) | grepl('treviso', country, ignore.case = T) |
grepl('Tezze di Piave', country, ignore.case = T) | grepl('Valmontone', country, ignore.case = T) |
grepl('Vergato', country, ignore.case = T) | grepl('veneto', country, ignore.case = T) |
grepl('Gragnano lucca', country, ignore.case = T) ~ 'Italy',
grepl('hong kong', country, ignore.case = T) ~ 'Hong Kong S.A.R.',
grepl('phil', country, ignore.case = T) | grepl('Filipinas', country, ignore.case = T) ~ 'Philippines',
grepl('argentina', country, ignore.case = T) | grepl('arge', country, ignore.case = T) ~ 'Argentina',
grepl('pakistan', country, ignore.case = T) | grepl('Abbottabad', country, ignore.case = T) |
grepl('Peshawar', country, ignore.case = T) ~ 'Pakistan',
grepl('united kingdo', country, ignore.case = T) | grepl('^uk$', country, ignore.case = T) |
grepl('Reino Unido', country, ignore.case = T) | grepl('britain', country, ignore.case = T) |
grepl('Regno Unito', country, ignore.case = T) | grepl('u\\.k\\.', country, ignore.case = T) |
grepl('بريطانيا', country, ignore.case = T) | grepl('the uk', country, ignore.case = T) |
grepl('U K', country, ignore.case = T) | grepl('Verenigd Koninkrijk', country, ignore.case = T) |
grepl('Windsor', country, ignore.case = T) | grepl('scotland', country, ignore.case = T) |
grepl('england', country, ignore.case = T) | grepl('wales', country, ignore.case = T) |
grepl('İngiltere', country, ignore.case = T) | grepl('Northern Ireland', country, ignore.case = T) |
grepl('Egland', country, ignore.case = T) | grepl('^gb$', country, ignore.case = T) |
grepl('N Ireland', country, ignore.case = T) | grepl('Schotland', country, ignore.case = T) |
grepl('Scozia', country, ignore.case = T) ~ 'United Kingdom',
grepl('africa', country, ignore.case = T) | grepl('^SA$', country, ignore.case = T) |
grepl('Sudáfrica', country, ignore.case = T) | grepl('western cape', country, ignore.case = T) ~ 'South Africa',
grepl('^chile$', country, ignore.case = T) ~ 'Chile',
grepl('australia', country, ignore.case = T) | grepl('Austrija', country, ignore.case = T) ~ 'Australia',
grepl('colombia', country, ignore.case = T) ~ 'Colombia',
grepl('turkey', country, ignore.case = T) | grepl('tür', country, ignore.case = T) ~ 'Turkey',
grepl('taiwan', country, ignore.case = T) ~ 'Taiwan',
grepl('^Venezuela$', country, ignore.case = T) ~ 'Venezuela',
grepl('israel', country, ignore.case = T) | grepl('اللد', country, ignore.case = T) |
grepl('اسرائيل', country, ignore.case = T) | grepl('كفر قاسم', country, ignore.case = T) |
grepl('Isreal', country, ignore.case = T) | grepl('רמלה', country, ignore.case = T) ~ 'Israel',
grepl('greece', country, ignore.case = T) | grepl('Grecia', country, ignore.case = T) ~ 'Greece',
grepl('austria', country, ignore.case = T) | grepl('sterreich', country, ignore.case = T) ~ 'Austria',
grepl('new zealand', country, ignore.case = T) | grepl('Neuseeland', country, ignore.case = T) ~ 'New Zealand',
grepl('Tuni', country, ignore.case = T) | grepl('تونس', country, ignore.case = T) ~ 'Tunisia',
grepl('Belg', country, ignore.case = T) | grepl('Bélgica', country, ignore.case = T) ~ 'Belgium',
grepl('China', country, ignore.case = T) ~ 'China',
grepl('cyp', country, ignore.case = T) ~ 'Cyprus',
grepl('Schweiz', country, ignore.case = T) | grepl('Suiza', country, ignore.case = T) |
grepl('Svizzera', country, ignore.case = T) | grepl('Zwitserland', country, ignore.case = T) |
grepl('switzerland', country, ignore.case = T) ~ 'Switzerland',
grepl('United Arab Emirates', country, ignore.case = T) | grepl('uae', country, ignore.case = T) ~ 'United Arab Emirates',
grepl('Croa', country, ignore.case = T) ~ 'Croatia',
grepl('india', country, ignore.case = T) ~ 'India',
grepl('algeri', country, ignore.case = T) | grepl('الجزائر', country, ignore.case = T) |
grepl('Algérie', country, ignore.case = T) ~ 'Algeria',
grepl('bulgaria', country, ignore.case = T) ~ 'Bulgaria',
grepl('Poland', country, ignore.case = T) | grepl('POLONIA', country, ignore.case = T) ~ 'Poland',
grepl('romania', country, ignore.case = T) ~ 'Romania',
grepl('singapore', country, ignore.case = T) ~ 'Singapore',
grepl('Srbija', country, ignore.case = T) | grepl('serbia', country, ignore.case = T) |
grepl('Србија', country, ignore.case = T) ~ 'Republic of Serbia',
grepl('czech', country, ignore.case = T) | grepl('checa', country, ignore.case = T) ~ 'Czech Republic',
grepl('lux', country, ignore.case = T) ~ 'Luxembourg',
grepl('slova', country, ignore.case = T) ~ 'Slovakia',
grepl('brazil', country, ignore.case = T) | grepl('brasil', country, ignore.case = T)~ 'Brazil',
grepl('^ireland$', country, ignore.case = T) | grepl('Irlanda', country, ignore.case = T) ~ 'Ireland',
grepl('japan', country, ignore.case = T) | grepl('Giappone', country, ignore.case = T) |
grepl('Japonya', country, ignore.case = T) ~ 'Japan',
grepl('Malay', country, ignore.case = T) ~ 'Malaysia',
grepl('nigeria', country, ignore.case = T) ~ 'Nigeria',
grepl('Riyad', country, ignore.case = T) | grepl('^Saudi arabia$', country, ignore.case = T) |
grepl('Arabia Saudita', country, ignore.case = T) | grepl('^saudi$', country, ignore.case = T) |
grepl('Kingdom of Saudia arabia', country, ignore.case = T) | grepl('KSA', country, ignore.case = T) |
grepl('k\\.s\\.a', country, ignore.case = T) | grepl('Arabie saoudite', country, ignore.case = T) |
grepl('الرياض', country, ignore.case = T) | grepl('السعودية', country, ignore.case = T) |
grepl('السعوديه', country, ignore.case = T) ~ 'Saudi Arabia',
grepl('^thailand$', country, ignore.case = T) ~ 'Thailand',
grepl('urug', country, ignore.case = T) ~ 'Uruguay',
grepl('costa', country, ignore.case = T) ~ 'Costa Rica',
grepl('ecuador', country, ignore.case = T) ~ 'Ecuador',
grepl('finland', country, ignore.case = T) ~ 'Finland',
grepl('guat', country, ignore.case = T) ~ 'Guatemala',
grepl('iceland', country, ignore.case = T) ~ 'Iceland',
grepl('iraq', country, ignore.case = T) | grepl('العراق', country, ignore.case = T) ~ 'Iraq',
grepl('iran', country, ignore.case = T) ~ 'Iran',
grepl('lebanon', country, ignore.case = T) | grepl('liban', country, ignore.case = T) ~ 'Lebanon',
grepl('norway', country, ignore.case = T) ~ 'Norway',
grepl('palestine', country, ignore.case = T) | grepl('فلسطين ', country, ignore.case = T) |
grepl('^فلسطين$', country, ignore.case = T) | grepl('الرملة', country, ignore.case = T) ~ 'Palestine',
grepl('peru', country, ignore.case = T) ~ 'Peru',
grepl('domin', country, ignore.case = T) ~ 'Dominican Republic',
grepl('albania', country, ignore.case = T) ~ 'Albania',
grepl('andorra', country, ignore.case = T) ~ 'Andorra',
grepl('bahrain', country, ignore.case = T) ~ 'Bahrain',
grepl('bangladesh', country, ignore.case = T) ~ 'Bangladesh',
grepl('botswana', country, ignore.case = T) ~ 'Botswana',
grepl('camer', country, ignore.case = T) ~ 'Cameroon',
grepl('المغرب', country, ignore.case = T) | grepl('Maroc', country, ignore.case = T) ~ 'Morocco',
grepl('jordan', country, ignore.case = T) ~ 'Jordan',
grepl('ليبيا', country, ignore.case = T) ~ 'Libya',
grepl('مصر', country, ignore.case = T) ~ 'Egypt',
grepl('mark', country, ignore.case = T) ~ 'Denmark',
grepl('salvador', country, ignore.case = T) ~ 'El Salvador',
grepl('estonia', country, ignore.case = T) ~ 'Estonia',
grepl('korea', country, ignore.case = T) | grepl('Güney Kore', country, ignore.case = T) ~ 'South Korea',
grepl('hungary', country, ignore.case = T) ~ 'Hungary',
grepl('maurice', country, ignore.case = T) ~ 'Mauritius',
grepl('jamaica', country, ignore.case = T) ~ 'Jamaica',
grepl('kenia', country, ignore.case = T) ~ 'Kenya',
grepl('laos', country, ignore.case = T) ~ 'Laos',
grepl('latvia', country, ignore.case = T) ~ 'Latvia',
grepl('malta', country, ignore.case = T) ~ 'Malta',
grepl('myanmar', country, ignore.case = T) ~ 'Myanmar',
grepl('nepal', country, ignore.case = T) ~ 'Nepal',
grepl('^oman$', country, ignore.case = T) ~ 'Oman',
grepl('qatar', country, ignore.case = T) ~ 'Qatar',
grepl('panam', country, ignore.case = T) ~ 'Panama',
grepl('tanzania', country, ignore.case = T) ~ 'United Republic of Tanzania',
grepl('vietnam', country, ignore.case = T) ~ 'Vietnam'))
# tallying un-recoded countries to see missing ones
country_counts <- dt5newVars %>%
filter(is.na(coded_country)) %>%
group_by(country) %>%
tally()
Action needed:
We currently have 982 different free text country responses. The most recent codes leaves 1313 responses are still not consolidated.
Political orientation was measured per language. We merge these variables here.
# clean-up country coding:
rm(country_counts)
# political orientation
dt5newVars <- dt5newVars %>%
mutate(PolOrX = labelled(rowSums(dplyr::select(., ends_with("_x")), na.rm = T),
labels = NULL, label="Political Compass X-Coordinate"),
PolOrY = labelled(rowSums(dplyr::select(., ends_with("_y")), na.rm = T),
labels = NULL, label="Political Compass Y-Coordinate"),
PolOrAuthoritarianLeft = rowSums(dplyr::select(., ends_with("_Authoritarian_Left")), na.rm = T),
PolOrAuthoritarianLeftLab = dplyr::recode(PolOrAuthoritarianLeft, `1` = "Authoritarian Left", `0` = ""),
PolOrAuthoritarianRight = rowSums(dplyr::select(., ends_with("_Authoritarian_right")), na.rm = T),
PolOrAuthoritarianRightLab = dplyr::recode(PolOrAuthoritarianRight, `1` = "Authoritarian Right", `0` = ""),
PolOrLibertarianLeft = rowSums(dplyr::select(., ends_with("_Libertarian_Left")), na.rm = T),
PolOrLibertarianLeftLab = dplyr::recode(PolOrLibertarianLeft, `1` = "Libertarian Left", `0` = ""),
PolOrLibertarianRight = rowSums(dplyr::select(., ends_with("_Libertarian_Right")), na.rm = T),
PolOrLibertarianRightLab = dplyr::recode(PolOrLibertarianRight, `1` = "Libertarian Right", `0` = ""),
PolOrOther = rowSums(dplyr::select(., ends_with("_Other")), na.rm = T),
PolOrOtherLab = dplyr::recode(PolOrOther, `1` = "Other", `0` = ""),
PolOrCat = paste0(PolOrAuthoritarianLeftLab,
PolOrAuthoritarianRightLab,
PolOrLibertarianLeftLab,
PolOrLibertarianRightLab,
PolOrOtherLab),
PolOrCat = as.factor(na_if(PolOrCat, ""))) %>%
dplyr::select(-starts_with("Pol"),
PolOrX,
PolOrY,
PolOrCat)
attr(dt5newVars$PolOrCat,'label') <- 'Political Orientation Quadrant'
# High Arousal Negative
## Anger not measured in wave 1
pairs.panels.new(dt5newVars %>% dplyr::select(affAnx, affNerv))
cat("<br>")
dt5newVars$affHighNeg.m <- scoreItems(keys=c(1,1), items = dt5newVars %>% dplyr::select(affAnx, affNerv), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt5newVars$affHighNeg.m, skew=F)) %>%
mutate(vars = "High Arousal Negative Affect") %>%
kable(., caption = "High Arousal Negative Affect: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| High Arousal Negative Affect | 22140 | 2.893 | 0.9753 | 1 | 5 | 4 | 0.0066 |
dt5newVars$affHighNeg.c <- scale(dt5newVars$affHighNeg.m, scale = F, center = T)
dt5newVars$affHighNeg.z <- scale(dt5newVars$affHighNeg.m, scale = T)
dt5newVars$affHighNeg.fa <- fa(dt5newVars %>% dplyr::select(affAnx, affNerv))$scores
# Low Arousal Negative Affect
ia.affLowNeg <- dt5newVars %>%
dplyr::select(affBor, affExh, affDepr) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.affLowNeg$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.57 . Furthermore, deleting item(s) 1 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.affLowNeg)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| affDepr | 0.4667 | 0.8076 | 2.212 | 1.154 |
| affExh | 0.3811 | 0.5210 | 2.519 | 1.226 |
| affBor | 0.2932 | 0.3720 | 2.539 | 1.302 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(affBor, affExh, affDepr))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Low Arousal Negative Affect: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| affBor | 16901 | 2.539 | 1.302 | 2 | 2.442 | 1.483 | 1 | 5 | 4 | 0.3597 | -1.0522 | 0.0100 |
| affExh | 16860 | 2.519 | 1.226 | 2 | 2.452 | 1.483 | 1 | 5 | 4 | 0.3142 | -0.9974 | 0.0094 |
| affDepr | 16890 | 2.212 | 1.154 | 2 | 2.096 | 1.483 | 1 | 5 | 4 | 0.6405 | -0.5863 | 0.0089 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(affBor, affExh, affDepr))
cat("<br>")
dt5newVars$affLowNeg.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(affBor, affExh, affDepr), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt5newVars$affLowNeg.m, skew=F)) %>%
mutate(vars = "Low Arousal Negative Affect") %>%
kable(., caption = "Low Arousal Negative Affect: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Low Arousal Negative Affect | 22140 | 2.323 | 0.8046 | 1 | 5 | 4 | 0.0054 |
dt5newVars$affLowNeg.c <- scale(dt5newVars$affLowNeg.m, scale = F, center = T)
dt5newVars$affLowNeg.z <- scale(dt5newVars$affLowNeg.m, scale = T)
dt5newVars$affLowNeg.fa <- fa(dt5newVars %>% dplyr::select(affBor, affExh, affDepr))$scores
# Low Arousal Positive Affect
ia.affLowPos <- dt5newVars %>%
dplyr::select(affCalm, affContent, affRel) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.affLowPos$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.73 . Furthermore, deleting item(s) 2 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.affLowPos)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| affRel | 0.6214 | 0.8062 | 2.553 | 1.093 |
| affCalm | 0.6099 | 0.7786 | 2.808 | 1.067 |
| affContent | 0.4476 | 0.5096 | 2.570 | 1.091 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(affCalm, affContent, affRel))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Low Arousal Positive Affect: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| affCalm | 16881 | 2.808 | 1.067 | 3 | 2.831 | 1.483 | 1 | 5 | 4 | -0.0229 | -0.7370 | 0.0082 |
| affContent | 16846 | 2.570 | 1.091 | 3 | 2.548 | 1.483 | 1 | 5 | 4 | 0.1307 | -0.8139 | 0.0084 |
| affRel | 16864 | 2.553 | 1.093 | 3 | 2.525 | 1.483 | 1 | 5 | 4 | 0.1821 | -0.7993 | 0.0084 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(affCalm, affContent, affRel))
cat("<br>")
dt5newVars$affLowPos.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(affCalm, affContent, affRel), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt5newVars$affLowPos.m, skew=F)) %>%
mutate(vars = "Low Arousal Positive Affect") %>%
kable(., caption = "Low Arousal Positive Affect: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Low Arousal Positive Affect | 22140 | 2.728 | 0.779 | 1 | 5 | 4 | 0.0052 |
dt5newVars$affLowPos.c <- scale(dt5newVars$affLowPos.m, scale = F, center = T)
dt5newVars$affLowPos.z <- scale(dt5newVars$affLowPos.m, scale = T)
dt5newVars$affLowPos.fa <- fa(dt5newVars %>% dplyr::select(affCalm, affContent, affRel))$scores
# High Arousal Positive Affect
ia.affHighPos <- dt5newVars %>%
dplyr::select(affEnerg, affExc, affInsp) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.affHighPos$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.68 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.affHighPos)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| affInsp | 0.5291 | 0.7205 | 2.323 | 1.136 |
| affEnerg | 0.4952 | 0.6447 | 2.421 | 1.074 |
| affExc | 0.4542 | 0.5694 | 2.097 | 1.099 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(affEnerg, affExc, affInsp))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "High Arousal Positive Affect: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| affEnerg | 16862 | 2.421 | 1.074 | 2 | 2.369 | 1.483 | 1 | 5 | 4 | 0.2566 | -0.7370 | 0.0083 |
| affExc | 16834 | 2.097 | 1.099 | 2 | 1.967 | 1.483 | 1 | 5 | 4 | 0.6779 | -0.4938 | 0.0085 |
| affInsp | 16863 | 2.323 | 1.136 | 2 | 2.239 | 1.483 | 1 | 5 | 4 | 0.4316 | -0.7707 | 0.0087 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(affEnerg, affExc, affInsp))
cat("<br>")
dt5newVars$affHighPos.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(affEnerg, affExc, affInsp), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt5newVars$affHighPos.m, skew=F)) %>%
mutate(vars = "High Arousal Positive Affect") %>%
kable(., caption = "High Arousal Positive Affect: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| High Arousal Positive Affect | 22140 | 2.213 | 0.7636 | 1 | 5 | 4 | 0.0051 |
dt5newVars$affHighPos.c <- scale(dt5newVars$affHighPos.m, scale = F, center = T)
dt5newVars$affHighPos.z <- scale(dt5newVars$affHighPos.m, scale = T)
dt5newVars$affHighPos.fa <- fa(dt5newVars %>% dplyr::select(affEnerg, affExc, affInsp))$scores
ia.lone<- dt5newVars %>%
dplyr::select(starts_with("lone")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.lone$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.8 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.lone)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| lone01 | 0.6829 | 0.8197 | 2.340 | 1.130 |
| lone02 | 0.6438 | 0.7490 | 2.625 | 1.216 |
| lone03 | 0.6187 | 0.7086 | 1.941 | 1.088 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(starts_with("lone")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Loneliness: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| lone01 | 16420 | 2.340 | 1.130 | 2 | 2.260 | 1.483 | 1 | 5 | 4 | 0.3903 | -0.7852 | 0.0088 |
| lone02 | 16408 | 2.625 | 1.216 | 3 | 2.578 | 1.483 | 1 | 5 | 4 | 0.1722 | -0.9937 | 0.0095 |
| lone03 | 16401 | 1.941 | 1.088 | 2 | 1.770 | 1.483 | 1 | 5 | 4 | 0.9577 | 0.0113 | 0.0085 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(starts_with("lone")))
cat("<br>")
dt5newVars$lone.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(starts_with("lone")), min = 1, max = 5)$scores
as.data.frame(psych::describe(dt5newVars$lone.m, skew=F)) %>%
mutate(vars = "Loneliness") %>%
kable(., caption = "Loneliness: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Loneliness | 22140 | 2.31 | 0.8372 | 1 | 5 | 4 | 0.0056 |
dt5newVars$lone.c <- scale(dt5newVars$lone.m, scale = F, center = T)
dt5newVars$lone.z <- scale(dt5newVars$lone.m, scale = T)
dt5newVars$lone.fa <- fa(dt5newVars %>% dplyr::select(starts_with("lone")))$scores
ia.bor<- dt5newVars %>%
dplyr::select(starts_with("bor0"), -bor03) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.bor$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.5 . Item(s) that exhibited low correlation with the rest of the scale were: 3 . Furthermore, deleting item(s) 3 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.bor)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| bor02 | 0.4836 | 0.7197 | 0.0578 | 1.874 |
| bor01 | 0.4529 | 0.7097 | 0.3892 | 1.900 |
| bor03_R | 0.0761 | 0.4390 | -0.2860 | 1.677 |
pairs.panels.new(dt5newVars %>% dplyr::select(starts_with("bor0"), -bor03))
Item dropped:
Item three was not well behaved. It seems to measure something else. We dropped it for now.
pairs.panels.new(dt5newVars %>% dplyr::select(starts_with("bor0"), -bor03, -bor03_R))
cat("<br>")
dt5newVars$bor.m <- scoreItems(keys=c(1,1), items = dt5newVars %>% dplyr::select(starts_with("bor0"), -bor03, -bor03_R), min = -3, max = 3)$scores
as.data.frame(psych::describe(dt5newVars$bor.m, skew=F)) %>%
mutate(vars = "Boredom") %>%
kable(., caption = "Boredom: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Boredom | 22140 | 0.1559 | 1.42 | -3 | 3 | 6 | 0.0095 |
dt5newVars$bor.c <- scale(dt5newVars$bor.m, scale = F, center = T)
dt5newVars$bor.z <- scale(dt5newVars$bor.m, scale = T)
dt5newVars$bor.fa <- fa(dt5newVars %>% dplyr::select(starts_with("bor0"), -bor03, -bor03_R))$scores
cat(crayon::bold("Offline Isolation"))
Offline Isolation
ia.isoPers <- dt5newVars %>%
dplyr::select(ends_with("inPerson")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.isoPers$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.56 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.isoPers)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| isoOthPpl_inPerson | 0.4466 | 0.7590 | 1.9215 | 2.204 |
| isoImmi_inPerson | 0.3545 | 0.4859 | 0.4502 | 1.357 |
| isoFriends_inPerson | 0.3226 | 0.4288 | 2.0844 | 2.490 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(ends_with("inPerson")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Isolation offline: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| isoFriends_inPerson | 16310 | 2.0844 | 2.490 | 1 | 1.7305 | 1.483 | 0 | 7 | 7 | 0.9653 | -0.5160 | 0.0195 |
| isoOthPpl_inPerson | 16220 | 1.9215 | 2.204 | 1 | 1.5538 | 1.483 | 0 | 7 | 7 | 1.0752 | 0.0131 | 0.0173 |
| isoImmi_inPerson | 16110 | 0.4502 | 1.357 | 0 | 0.0582 | 0.000 | 0 | 7 | 7 | 3.4731 | 11.7176 | 0.0107 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(ends_with("inPerson")))
cat("<br>")
dt5newVars$isoPers.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(ends_with("inPerson")), min = 0, max = 7)$scores
as.data.frame(psych::describe(dt5newVars$isoPers.m, skew=F)) %>%
mutate(vars = "Isolation offline") %>%
kable(., caption = "Isolation offline: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Isolation offline | 22140 | 1.267 | 1.345 | 0 | 7 | 7 | 0.009 |
dt5newVars$isoPers.c <- scale(dt5newVars$isoPers.m, scale = F, center = T)
dt5newVars$isoPers.z <- scale(dt5newVars$isoPers.m, scale = T)
dt5newVars$isoPers.fa <- fa(dt5newVars %>% dplyr::select(ends_with("inPerson")))$scores
cat(crayon::bold("Online Isolation"))
Online Isolation
ia.isoOnl <- dt5newVars %>%
dplyr::select(ends_with("online")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.isoOnl$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.53 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.isoOnl)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| isoOthPpl_online | 0.4547 | 0.9247 | 3.0002 | 2.726 |
| isoFriends_online | 0.2990 | 0.3797 | 4.8748 | 2.346 |
| isoImmi_online | 0.2872 | 0.3620 | 0.7673 | 1.844 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(ends_with("inPerson")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Isolation online: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| isoFriends_inPerson | 16310 | 2.0844 | 2.490 | 1 | 1.7305 | 1.483 | 0 | 7 | 7 | 0.9653 | -0.5160 | 0.0195 |
| isoOthPpl_inPerson | 16220 | 1.9215 | 2.204 | 1 | 1.5538 | 1.483 | 0 | 7 | 7 | 1.0752 | 0.0131 | 0.0173 |
| isoImmi_inPerson | 16110 | 0.4502 | 1.357 | 0 | 0.0582 | 0.000 | 0 | 7 | 7 | 3.4731 | 11.7176 | 0.0107 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(ends_with("online")))
cat("<br>")
dt5newVars$isoOnl.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(ends_with("online")), min = 0, max = 7)$scores
as.data.frame(psych::describe(dt5newVars$isoPers.m, skew=F)) %>%
mutate(vars = "Isolation online") %>%
kable(., caption = "Isolation online: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Isolation online | 22140 | 1.267 | 1.345 | 0 | 7 | 7 | 0.009 |
dt5newVars$isoOnl.c <- scale(dt5newVars$isoOnl.m, scale = F, center = T)
dt5newVars$isoOnl.z <- scale(dt5newVars$isoOnl.m, scale = T)
dt5newVars$isoOnl.fa <- fa(dt5newVars %>% dplyr::select(ends_with("online")))$scores
# Leave House
as.data.frame(psych::describe(dt5newVars$houseLeave, skew=F)) %>%
mutate(vars = "Leaving House") %>%
kable(., caption = "Leaving House: Item Descriptive", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Leaving House | 15538 | 2.434 | 1.066 | 1 | 4 | 3 | 0.0086 |
No responses yet
# extC19Msg
ia.ext <- dt5newVars %>%
dplyr::select(starts_with("extC19"), -extC19Msg) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.ext$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.77 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.ext)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| extC19Rules | 0.7057 | 0.9249 | 4.076 | 1.451 |
| extC19Org | 0.5819 | 0.6703 | 3.763 | 1.443 |
| extC19Punish | 0.5413 | 0.6108 | 3.198 | 1.670 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(starts_with("extC19"), -extC19Msg))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Community response: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| extC19Rules | 15789 | 4.076 | 1.451 | 4 | 4.168 | 1.483 | 1 | 6 | 5 | -0.4310 | -0.7039 | 0.0115 |
| extC19Punish | 15782 | 3.198 | 1.670 | 3 | 3.123 | 1.483 | 1 | 6 | 5 | 0.1874 | -1.1864 | 0.0133 |
| extC19Org | 15789 | 3.763 | 1.443 | 4 | 3.800 | 1.483 | 1 | 6 | 5 | -0.1957 | -0.8002 | 0.0115 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(starts_with("extC19"), -extC19Msg))
cat("<br>")
dt5newVars$ext.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(starts_with("extC19"), -extC19Msg),
min = 1, max = 6)$scores
as.data.frame(psych::describe(dt5newVars$ext.m, skew=F)) %>%
mutate(vars = "Community response") %>%
kable(., caption = "Community response: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Community response | 22140 | 3.675 | 1.066 | 1 | 6 | 5 | 0.0072 |
dt5newVars$ext.c <- scale(dt5newVars$ext.m, scale = F, center = T)
dt5newVars$ext.z <- scale(dt5newVars$ext.m, scale = T)
dt5newVars$ext.fa <- fa(dt5newVars %>% dplyr::select(starts_with("extC19"), -extC19Msg))$scores
ia.beh <- dt5newVars %>%
dplyr::select(starts_with("c19per")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.beh$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.68 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.beh)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| c19perBeh02 | 0.6014 | 0.8971 | 2.616 | 0.8676 |
| c19perBeh03 | 0.4663 | 0.5656 | 2.072 | 1.3721 |
| c19perBeh01 | 0.4302 | 0.5106 | 2.464 | 0.9582 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(starts_with("c19per")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Behavioral response: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| c19perBeh01 | 15882 | 2.464 | 0.9582 | 3 | 2.680 | 0 | -3 | 3 | 6 | -2.855 | 10.696 | 0.0076 |
| c19perBeh02 | 15885 | 2.616 | 0.8676 | 3 | 2.809 | 0 | -3 | 3 | 6 | -3.616 | 16.716 | 0.0069 |
| c19perBeh03 | 15887 | 2.072 | 1.3721 | 3 | 2.375 | 0 | -3 | 3 | 6 | -1.874 | 3.305 | 0.0109 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(starts_with("c19per")))
cat("<br>")
dt5newVars$beh.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(starts_with("c19per")),
min = -3, max = 3)$scores
as.data.frame(psych::describe(dt5newVars$beh.m, skew=F)) %>%
mutate(vars = "Behavioral response") %>%
kable(., caption = "Behavioral response: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Behavioral response | 22140 | 2.558 | 0.7658 | -3 | 3 | 6 | 0.0051 |
dt5newVars$beh.c <- scale(dt5newVars$beh.m, scale = F, center = T)
dt5newVars$beh.z <- scale(dt5newVars$beh.m, scale = T)
dt5newVars$beh.fa <- fa(dt5newVars %>% dplyr::select(starts_with("c19per")))$scores
as.data.frame(psych::describe(dt5newVars$c19Hope)) %>%
mutate(vars = "Hope") %>%
kable(., caption = "Hope: Item Descriptive", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| Hope | 15939 | 1.096 | 1.6 | 1 | 1.279 | 1.483 | -3 | 3 | 6 | -0.8269 | -0.1207 | 0.0127 |
ggplot(dt5newVars, aes(x = c19Hope)) +
geom_histogram(binwidth=1, alpha=0.5) +
#geom_density(alpha=0.6)+
labs(title="Hope distribution",x="Corona Virus Hope", y = "Frequency") +
theme_Publication()
as.data.frame(psych::describe(dt5newVars$c19Eff)) %>%
mutate(vars = "Efficacy") %>%
kable(., caption = "Efficacy: Item Descriptive", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| Efficacy | 15934 | 0.7955 | 1.654 | 1 | 0.9245 | 1.483 | -3 | 3 | 6 | -0.6409 | -0.473 | 0.0131 |
ggplot(dt5newVars, aes(x = c19Eff)) +
geom_histogram(binwidth=1, alpha=0.5) +
#geom_density(alpha=0.6)+
labs(title="Efficacy distribution",x="Corona Virus Efficacy", y = "Frequency") +
theme_Publication()
ia.para <- dt5newVars %>%
dplyr::select(starts_with("para")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.para$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.7 . Furthermore, deleting item(s) 1 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.para)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| para02 | 0.6219 | 0.8830 | 2.395 | 2.591 |
| para03 | 0.5557 | 0.7007 | 2.305 | 2.597 |
| para01 | 0.3854 | 0.4379 | 5.356 | 3.002 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(starts_with("para")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "State Paranoia: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| para01 | 15175 | 5.356 | 3.002 | 6 | 5.438 | 2.965 | 0 | 10 | 10 | -0.2625 | -0.9413 | 0.0244 |
| para02 | 15167 | 2.395 | 2.591 | 2 | 2.009 | 2.965 | 0 | 10 | 10 | 1.0120 | 0.2084 | 0.0210 |
| para03 | 15162 | 2.305 | 2.597 | 1 | 1.900 | 1.483 | 0 | 10 | 10 | 1.0575 | 0.2242 | 0.0211 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(starts_with("para")))
cat("<br>")
dt5newVars$para.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(starts_with("para")),
min = 0, max = 10)$scores
as.data.frame(psych::describe(dt5newVars$para.m, skew=F)) %>%
mutate(vars = "State Paranoia") %>%
kable(., caption = "State Paranoia: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| State Paranoia | 22140 | 3.241 | 1.795 | 0 | 10 | 10 | 0.0121 |
dt5newVars$para.c <- scale(dt5newVars$para.m, scale = F, center = T)
dt5newVars$para.z <- scale(dt5newVars$para.m, scale = T)
dt5newVars$para.fa <- fa(dt5newVars %>% dplyr::select(starts_with("para")))$scores
ia.consp <- dt5newVars %>%
dplyr::select(starts_with("consp")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.consp$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.7 . Furthermore, deleting item(s) 3 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.consp)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| consp02 | 0.6002 | 0.8209 | 7.200 | 2.476 |
| consp01 | 0.5796 | 0.7630 | 6.893 | 2.621 |
| consp03 | 0.3757 | 0.4279 | 5.277 | 2.726 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(starts_with("consp")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Conspiracy Theory: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| consp01 | 14892 | 6.893 | 2.621 | 7 | 7.173 | 2.965 | 0 | 10 | 10 | -0.6862 | -0.2324 | 0.0215 |
| consp02 | 14877 | 7.200 | 2.476 | 8 | 7.503 | 2.965 | 0 | 10 | 10 | -0.8609 | 0.1864 | 0.0203 |
| consp03 | 14877 | 5.277 | 2.726 | 5 | 5.317 | 2.965 | 0 | 10 | 10 | -0.1088 | -0.7469 | 0.0224 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(starts_with("consp")))
cat("<br>")
dt5newVars$consp.m <- scoreItems(keys=c(1,1,1), items = dt5newVars %>% dplyr::select(starts_with("consp")),
min = 0, max = 10)$scores
as.data.frame(psych::describe(dt5newVars$consp.m, skew=F)) %>%
mutate(vars = "Conspiracy Theory") %>%
kable(., caption = "Conspiracy Theory: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Conspiracy Theory | 22140 | 6.526 | 1.694 | 0 | 10 | 10 | 0.0114 |
dt5newVars$para.c <- scale(dt5newVars$consp.m, scale = F, center = T)
dt5newVars$para.z <- scale(dt5newVars$consp.m, scale = T)
dt5newVars$para.fa <- fa(dt5newVars %>% dplyr::select(starts_with("consp")))$scores
ia.jobinsec<- dt5newVars %>%
dplyr::select(starts_with("jbInsec"), -jbInsec02, -jbInsec04) %>%
na_if(., -99) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.jobinsec$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.85 .
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.jobinsec)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| jbInsec01 | 0.7559 | 0.8731 | -0.7854 | 1.168 |
| jbInsec03 | 0.7102 | 0.7952 | -0.0872 | 1.374 |
| jbInsec02_R | 0.6822 | 0.7527 | -0.7252 | 1.157 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(starts_with("jbInsec"), -jbInsec02, -jbInsec04) %>% na_if(., -99))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Job insecurity: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| jbInsec01 | 12033 | -0.7854 | 1.168 | -1 | -0.9279 | 1.483 | -2 | 2 | 4 | 0.7725 | -0.2330 | 0.0106 |
| jbInsec03 | 13081 | -0.0872 | 1.374 | 0 | -0.1090 | 1.483 | -2 | 2 | 4 | 0.0364 | -1.2885 | 0.0120 |
| jbInsec02_R | 12439 | -0.7252 | 1.157 | -1 | -0.8496 | 1.483 | -2 | 2 | 4 | 0.6760 | -0.3479 | 0.0104 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(starts_with("jbInsec"), -jbInsec02, -jbInsec04) %>% na_if(., -99))
cat("<br>")
dt5newVars$jobinsec.m <- scoreItems(keys=c(1,1,1),
items = dt5newVars %>% dplyr::select(starts_with("jbInsec"), -jbInsec02, -jbInsec04) %>% na_if(., -99),
min = -2, max = 2)$scores
as.data.frame(psych::describe(dt5newVars$jobinsec.m, skew=F)) %>%
mutate(vars = "Job insecurity") %>%
kable(., caption = "Job insecurity: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| Job insecurity | 22140 | -0.5935 | 0.7891 | -2 | 2 | 4 | 0.0053 |
dt5newVars$jobinsec.c <- scale(dt5newVars$jobinsec.m, scale = F, center = T)
dt5newVars$jobinsec.z <- scale(dt5newVars$jobinsec.m, scale = T)
dt5newVars$jobinsec.fa <- fa(dt5newVars %>% dplyr::select(starts_with("jbInsec"), -jbInsec02, -jbInsec04))$scores
ia.pfs<- dt5newVars %>%
dplyr::select(starts_with("PFS0")) %>%
Scale::Scale() %>%
Scale::ItemAnalysis()
ia.pfs$rely
Reliability Analysis of . ScaleData object.
A spearman correlation matrix of 3 items was calculated and submitted to Reliability analysis.
The overall Cronbach’s Alpha was 0.85 . Furthermore, deleting item(s) 2 may improve reliability.
cat("<br><br>A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:")
A gls factor analysis was conducted. Items were regressed to a single factor. Their loadings are the following:
as.data.frame(Scale::ReportTable(ia.pfs)) %>%
kable(., row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| Item | Corr. to scale | Factor Loading | Mean | SD |
|---|---|---|---|---|
| PFS01 | 0.7983 | 0.9333 | -0.2230 | 1.238 |
| PFS03 | 0.7589 | 0.8586 | -0.4758 | 1.241 |
| PFS02 | 0.6157 | 0.6522 | 0.4220 | 1.205 |
cat("<br>")
as.data.frame(psych::describe(dt5newVars %>% dplyr::select(starts_with("PFS0")))) %>%
mutate(vars = rownames(.)) %>%
kable(., caption = "Financial Strain: Item Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | median | trimmed | mad | min | max | range | skew | kurtosis | se |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| PFS01 | 16367 | -0.2230 | 1.238 | 0 | -0.2711 | 1.483 | -2 | 2 | 4 | 0.1712 | -1.0222 | 0.0097 |
| PFS02 | 16378 | 0.4220 | 1.205 | 1 | 0.5161 | 1.483 | -2 | 2 | 4 | -0.5615 | -0.6736 | 0.0094 |
| PFS03 | 16374 | -0.4758 | 1.241 | -1 | -0.5645 | 1.483 | -2 | 2 | 4 | 0.4389 | -0.8831 | 0.0097 |
cat("<br>")
pairs.panels.new(dt5newVars %>% dplyr::select(starts_with("PFS0")))
cat("<br>")
dt5newVars$pfs.m <- scoreItems(keys=c(1,1,1),
items = dt5newVars %>% dplyr::select(starts_with("PFS0")),
min = -2, max = 2)$scores
as.data.frame(psych::describe(dt5newVars$pfs.m, skew=F)) %>%
mutate(vars = "inancial Strain") %>%
kable(., caption = "inancial Strain: Scale Descriptives", row.names = FALSE) %>%
kable_styling("hover", full_width = F, latex_options = "hold_position")
| vars | n | mean | sd | min | max | range | se |
|---|---|---|---|---|---|---|---|
| inancial Strain | 22140 | -0.0682 | 0.9295 | -2 | 2 | 4 | 0.0062 |
dt5newVars$pfs.c <- scale(dt5newVars$pfs.m, scale = F, center = T)
dt5newVars$pfs.z <- scale(dt5newVars$pfs.m, scale = T)
dt5newVars$pfs.fa <- fa(dt5newVars %>% dplyr::select(starts_with("PFS0")))$scores
# clean-up Item Analyses
rm(list=ls(pattern="ia"))
# remove directly identifiable data (with and without page timers)
dt6ReducedTimer <- dt5newVars %>%
dplyr::select(-c(IPAddress,
RecipientLastName,
RecipientFirstName,
RecipientEmail,
ExternalReference,
LocationLatitude,
LocationLongitude,
DistributionChannel,
ICRec_1_TEXT))
dt6Reduced <- dt6ReducedTimer %>%
dplyr::select(-starts_with("t_"))
# remove filtered cases (with and without page timers)
dt6ReducedTimerCases <- dt6ReducedTimer %>%
filter(FilterPreview == 0,
FilterTime == 0,
FilterStraightliner == 0) %>%
dplyr::select(-starts_with("Filter"))
dt6ReducedCases <- dt6Reduced %>%
filter(FilterPreview == 0,
FilterTime == 0,
FilterStraightliner == 0) %>%
dplyr::select(-starts_with("Filter"))
Export main dataframe as RData and SPSS sav files. We export versions with and without page timers
namSPSS <- paste0("data/cleaned data/Psycorona Baseline cleaned ", format(Sys.time(), format = "%F %H-%M %Z"),".sav")
namR <- paste0("data/cleaned data/Psycorona Baseline cleaned ", format(Sys.time(), format = "%F %H-%M %Z"),".RData")
namTSPSS <- paste0("data/cleaned data/Psycorona Baseline cleaned with page timer ", format(Sys.time(), format = "%F %H-%M %Z"),".sav")
namTR <- paste0("data/cleaned data/Psycorona Baseline cleaned with page timer ", format(Sys.time(), format = "%F %H-%M %Z"),".RData")
write_sav(dt6Reduced, namSPSS)
write_sav(dt6ReducedTimer, namTSPSS)
save(dt6Reduced, file = namR)
save(dt6ReducedTimer, file = namTR)
rm(list=ls(pattern="nam"))
# export for Shiny
saveRDS(dt6ReducedCases, file = "../PsyCorona-WebApp/data/reducedData.rds")